1   package org.apache.lucene.index;
2   
3   /*
4    * Licensed to the Apache Software Foundation (ASF) under one or more
5    * contributor license agreements.  See the NOTICE file distributed with
6    * this work for additional information regarding copyright ownership.
7    * The ASF licenses this file to You under the Apache License, Version 2.0
8    * (the "License"); you may not use this file except in compliance with
9    * the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  import java.io.IOException;
21  import java.util.ArrayList;
22  import java.util.Collection;
23  import java.util.List;
24  import java.util.Locale;
25  import java.util.Map;
26  
27  
28  /**
29   * <p>This class implements a {@link MergePolicy} that tries
30   * to merge segments into levels of exponentially
31   * increasing size, where each level has fewer segments than
32   * the value of the merge factor. Whenever extra segments
33   * (beyond the merge factor upper bound) are encountered,
34   * all segments within the level are merged. You can get or
35   * set the merge factor using {@link #getMergeFactor()} and
36   * {@link #setMergeFactor(int)} respectively.</p>
37   *
38   * <p>This class is abstract and requires a subclass to
39   * define the {@link #size} method which specifies how a
40   * segment's size is determined.  {@link LogDocMergePolicy}
41   * is one subclass that measures size by document count in
42   * the segment.  {@link LogByteSizeMergePolicy} is another
43   * subclass that measures size as the total byte size of the
44   * file(s) for the segment.</p>
45   */
46  
47  public abstract class LogMergePolicy extends MergePolicy {
48  
49    /** Defines the allowed range of log(size) for each
50     *  level.  A level is computed by taking the max segment
51     *  log size, minus LEVEL_LOG_SPAN, and finding all
52     *  segments falling within that range. */
53    public static final double LEVEL_LOG_SPAN = 0.75;
54  
55    /** Default merge factor, which is how many segments are
56     *  merged at a time */
57    public static final int DEFAULT_MERGE_FACTOR = 10;
58  
59    /** Default maximum segment size.  A segment of this size
60     *  or larger will never be merged.  @see setMaxMergeDocs */
61    public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
62  
63    /** Default noCFSRatio.  If a merge's size is {@code >= 10%} of
64     *  the index, then we disable compound file for it.
65     *  @see MergePolicy#setNoCFSRatio */
66    public static final double DEFAULT_NO_CFS_RATIO = 0.1;
67  
68    /** How many segments to merge at a time. */
69    protected int mergeFactor = DEFAULT_MERGE_FACTOR;
70  
71    /** Any segments whose size is smaller than this value
72     *  will be rounded up to this value.  This ensures that
73     *  tiny segments are aggressively merged. */
74    protected long minMergeSize;
75  
76    /** If the size of a segment exceeds this value then it
77     *  will never be merged. */
78    protected long maxMergeSize;
79  
80    // Although the core MPs set it explicitly, we must default in case someone
81    // out there wrote his own LMP ...
82    /** If the size of a segment exceeds this value then it
83     * will never be merged during {@link IndexWriter#forceMerge}. */
84    protected long maxMergeSizeForForcedMerge = Long.MAX_VALUE;
85  
86    /** If a segment has more than this many documents then it
87     *  will never be merged. */
88    protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
89  
90    /** If true, we pro-rate a segment's size by the
91     *  percentage of non-deleted documents. */
92    protected boolean calibrateSizeByDeletes = true;
93  
94    /** Sole constructor. (For invocation by subclass 
95     *  constructors, typically implicit.) */
96    public LogMergePolicy() {
97      super(DEFAULT_NO_CFS_RATIO, MergePolicy.DEFAULT_MAX_CFS_SEGMENT_SIZE);
98    }
99  
100   /** Returns true if {@code LMP} is enabled in {@link
101    *  IndexWriter}'s {@code infoStream}. */
102   protected boolean verbose(IndexWriter writer) {
103     return writer != null && writer.infoStream.isEnabled("LMP");
104   }
105 
106   /** Print a debug message to {@link IndexWriter}'s {@code
107    *  infoStream}. */
108   protected void message(String message, IndexWriter writer) {
109     if (verbose(writer)) {
110       writer.infoStream.message("LMP", message);
111     }
112   }
113 
114   /** <p>Returns the number of segments that are merged at
115    * once and also controls the total number of segments
116    * allowed to accumulate in the index.</p> */
117   public int getMergeFactor() {
118     return mergeFactor;
119   }
120 
121   /** Determines how often segment indices are merged by
122    * addDocument().  With smaller values, less RAM is used
123    * while indexing, and searches are
124    * faster, but indexing speed is slower.  With larger
125    * values, more RAM is used during indexing, and while
126    * searches is slower, indexing is
127    * faster.  Thus larger values ({@code > 10}) are best for batch
128    * index creation, and smaller values ({@code < 10}) for indices
129    * that are interactively maintained. */
130   public void setMergeFactor(int mergeFactor) {
131     if (mergeFactor < 2)
132       throw new IllegalArgumentException("mergeFactor cannot be less than 2");
133     this.mergeFactor = mergeFactor;
134   }
135 
136   /** Sets whether the segment size should be calibrated by
137    *  the number of deletes when choosing segments for merge. */
138   public void setCalibrateSizeByDeletes(boolean calibrateSizeByDeletes) {
139     this.calibrateSizeByDeletes = calibrateSizeByDeletes;
140   }
141 
142   /** Returns true if the segment size should be calibrated 
143    *  by the number of deletes when choosing segments for merge. */
144   public boolean getCalibrateSizeByDeletes() {
145     return calibrateSizeByDeletes;
146   }
147 
148   /** Return the number of documents in the provided {@link
149    *  SegmentCommitInfo}, pro-rated by percentage of
150    *  non-deleted documents if {@link
151    *  #setCalibrateSizeByDeletes} is set. */
152   protected long sizeDocs(SegmentCommitInfo info, IndexWriter writer) throws IOException {
153     if (calibrateSizeByDeletes) {
154       int delCount = writer.numDeletedDocs(info);
155       assert delCount <= info.info.maxDoc();
156       return (info.info.maxDoc() - (long)delCount);
157     } else {
158       return info.info.maxDoc();
159     }
160   }
161 
162   /** Return the byte size of the provided {@link
163    *  SegmentCommitInfo}, pro-rated by percentage of
164    *  non-deleted documents if {@link
165    *  #setCalibrateSizeByDeletes} is set. */
166   protected long sizeBytes(SegmentCommitInfo info, IndexWriter writer) throws IOException {
167     if (calibrateSizeByDeletes) {
168       return super.size(info, writer);
169     }
170     return info.sizeInBytes();
171   }
172   
173   /** Returns true if the number of segments eligible for
174    *  merging is less than or equal to the specified {@code
175    *  maxNumSegments}. */
176   protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
177     final int numSegments = infos.size();
178     int numToMerge = 0;
179     SegmentCommitInfo mergeInfo = null;
180     boolean segmentIsOriginal = false;
181     for(int i=0;i<numSegments && numToMerge <= maxNumSegments;i++) {
182       final SegmentCommitInfo info = infos.info(i);
183       final Boolean isOriginal = segmentsToMerge.get(info);
184       if (isOriginal != null) {
185         segmentIsOriginal = isOriginal;
186         numToMerge++;
187         mergeInfo = info;
188       }
189     }
190 
191     return numToMerge <= maxNumSegments &&
192       (numToMerge != 1 || !segmentIsOriginal || isMerged(infos, mergeInfo, writer));
193   }
194 
195   /**
196    * Returns the merges necessary to merge the index, taking the max merge
197    * size or max merge docs into consideration. This method attempts to respect
198    * the {@code maxNumSegments} parameter, however it might be, due to size
199    * constraints, that more than that number of segments will remain in the
200    * index. Also, this method does not guarantee that exactly {@code
201    * maxNumSegments} will remain, but &lt;= that number.
202    */
203   private MergeSpecification findForcedMergesSizeLimit(
204       SegmentInfos infos, int maxNumSegments, int last, IndexWriter writer) throws IOException {
205     MergeSpecification spec = new MergeSpecification();
206     final List<SegmentCommitInfo> segments = infos.asList();
207 
208     int start = last - 1;
209     while (start >= 0) {
210       SegmentCommitInfo info = infos.info(start);
211       if (size(info, writer) > maxMergeSizeForForcedMerge || sizeDocs(info, writer) > maxMergeDocs) {
212         if (verbose(writer)) {
213           message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")", writer);
214         }
215         // need to skip that segment + add a merge for the 'right' segments,
216         // unless there is only 1 which is merged.
217         if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos, infos.info(start + 1), writer))) {
218           // there is more than 1 segment to the right of
219           // this one, or a mergeable single segment.
220           spec.add(new OneMerge(segments.subList(start + 1, last)));
221         }
222         last = start;
223       } else if (last - start == mergeFactor) {
224         // mergeFactor eligible segments were found, add them as a merge.
225         spec.add(new OneMerge(segments.subList(start, last)));
226         last = start;
227       }
228       --start;
229     }
230 
231     // Add any left-over segments, unless there is just 1
232     // already fully merged
233     if (last > 0 && (++start + 1 < last || !isMerged(infos, infos.info(start), writer))) {
234       spec.add(new OneMerge(segments.subList(start, last)));
235     }
236 
237     return spec.merges.size() == 0 ? null : spec;
238   }
239   
240   /**
241    * Returns the merges necessary to forceMerge the index. This method constraints
242    * the returned merges only by the {@code maxNumSegments} parameter, and
243    * guaranteed that exactly that number of segments will remain in the index.
244    */
245   private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last, IndexWriter writer) throws IOException {
246     MergeSpecification spec = new MergeSpecification();
247     final List<SegmentCommitInfo> segments = infos.asList();
248 
249     // First, enroll all "full" merges (size
250     // mergeFactor) to potentially be run concurrently:
251     while (last - maxNumSegments + 1 >= mergeFactor) {
252       spec.add(new OneMerge(segments.subList(last - mergeFactor, last)));
253       last -= mergeFactor;
254     }
255 
256     // Only if there are no full merges pending do we
257     // add a final partial (< mergeFactor segments) merge:
258     if (0 == spec.merges.size()) {
259       if (maxNumSegments == 1) {
260 
261         // Since we must merge down to 1 segment, the
262         // choice is simple:
263         if (last > 1 || !isMerged(infos, infos.info(0), writer)) {
264           spec.add(new OneMerge(segments.subList(0, last)));
265         }
266       } else if (last > maxNumSegments) {
267 
268         // Take care to pick a partial merge that is
269         // least cost, but does not make the index too
270         // lopsided.  If we always just picked the
271         // partial tail then we could produce a highly
272         // lopsided index over time:
273 
274         // We must merge this many segments to leave
275         // maxNumSegments in the index (from when
276         // forceMerge was first kicked off):
277         final int finalMergeSize = last - maxNumSegments + 1;
278 
279         // Consider all possible starting points:
280         long bestSize = 0;
281         int bestStart = 0;
282 
283         for(int i=0;i<last-finalMergeSize+1;i++) {
284           long sumSize = 0;
285           for(int j=0;j<finalMergeSize;j++) {
286             sumSize += size(infos.info(j+i), writer);
287           }
288           if (i == 0 || (sumSize < 2*size(infos.info(i-1), writer) && sumSize < bestSize)) {
289             bestStart = i;
290             bestSize = sumSize;
291           }
292         }
293 
294         spec.add(new OneMerge(segments.subList(bestStart, bestStart + finalMergeSize)));
295       }
296     }
297     return spec.merges.size() == 0 ? null : spec;
298   }
299   
300   /** Returns the merges necessary to merge the index down
301    *  to a specified number of segments.
302    *  This respects the {@link #maxMergeSizeForForcedMerge} setting.
303    *  By default, and assuming {@code maxNumSegments=1}, only
304    *  one segment will be left in the index, where that segment
305    *  has no deletions pending nor separate norms, and it is in
306    *  compound file format if the current useCompoundFile
307    *  setting is true.  This method returns multiple merges
308    *  (mergeFactor at a time) so the {@link MergeScheduler}
309    *  in use may make use of concurrency. */
310   @Override
311   public MergeSpecification findForcedMerges(SegmentInfos infos,
312             int maxNumSegments, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer) throws IOException {
313 
314     assert maxNumSegments > 0;
315     if (verbose(writer)) {
316       message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge, writer);
317     }
318 
319     // If the segments are already merged (e.g. there's only 1 segment), or
320     // there are <maxNumSegments:.
321     if (isMerged(infos, maxNumSegments, segmentsToMerge, writer)) {
322       if (verbose(writer)) {
323         message("already merged; skip", writer);
324       }
325       return null;
326     }
327 
328     // Find the newest (rightmost) segment that needs to
329     // be merged (other segments may have been flushed
330     // since merging started):
331     int last = infos.size();
332     while (last > 0) {
333       final SegmentCommitInfo info = infos.info(--last);
334       if (segmentsToMerge.get(info) != null) {
335         last++;
336         break;
337       }
338     }
339 
340     if (last == 0) {
341       if (verbose(writer)) {
342         message("last == 0; skip", writer);
343       }
344       return null;
345     }
346     
347     // There is only one segment already, and it is merged
348     if (maxNumSegments == 1 && last == 1 && isMerged(infos, infos.info(0), writer)) {
349       if (verbose(writer)) {
350         message("already 1 seg; skip", writer);
351       }
352       return null;
353     }
354 
355     // Check if there are any segments above the threshold
356     boolean anyTooLarge = false;
357     for (int i = 0; i < last; i++) {
358       SegmentCommitInfo info = infos.info(i);
359       if (size(info, writer) > maxMergeSizeForForcedMerge || sizeDocs(info, writer) > maxMergeDocs) {
360         anyTooLarge = true;
361         break;
362       }
363     }
364 
365     if (anyTooLarge) {
366       return findForcedMergesSizeLimit(infos, maxNumSegments, last, writer);
367     } else {
368       return findForcedMergesMaxNumSegments(infos, maxNumSegments, last, writer);
369     }
370   }
371 
372   /**
373    * Finds merges necessary to force-merge all deletes from the
374    * index.  We simply merge adjacent segments that have
375    * deletes, up to mergeFactor at a time.
376    */ 
377   @Override
378   public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer)
379       throws IOException {
380     final List<SegmentCommitInfo> segments = segmentInfos.asList();
381     final int numSegments = segments.size();
382 
383     if (verbose(writer)) {
384       message("findForcedDeleteMerges: " + numSegments + " segments", writer);
385     }
386 
387     MergeSpecification spec = new MergeSpecification();
388     int firstSegmentWithDeletions = -1;
389     assert writer != null;
390     for(int i=0;i<numSegments;i++) {
391       final SegmentCommitInfo info = segmentInfos.info(i);
392       int delCount = writer.numDeletedDocs(info);
393       if (delCount > 0) {
394         if (verbose(writer)) {
395           message("  segment " + info.info.name + " has deletions", writer);
396         }
397         if (firstSegmentWithDeletions == -1)
398           firstSegmentWithDeletions = i;
399         else if (i - firstSegmentWithDeletions == mergeFactor) {
400           // We've seen mergeFactor segments in a row with
401           // deletions, so force a merge now:
402           if (verbose(writer)) {
403             message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive", writer);
404           }
405           spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i)));
406           firstSegmentWithDeletions = i;
407         }
408       } else if (firstSegmentWithDeletions != -1) {
409         // End of a sequence of segments with deletions, so,
410         // merge those past segments even if it's fewer than
411         // mergeFactor segments
412         if (verbose(writer)) {
413           message("  add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive", writer);
414         }
415         spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, i)));
416         firstSegmentWithDeletions = -1;
417       }
418     }
419 
420     if (firstSegmentWithDeletions != -1) {
421       if (verbose(writer)) {
422         message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive", writer);
423       }
424       spec.add(new OneMerge(segments.subList(firstSegmentWithDeletions, numSegments)));
425     }
426 
427     return spec;
428   }
429 
430   private static class SegmentInfoAndLevel implements Comparable<SegmentInfoAndLevel> {
431     SegmentCommitInfo info;
432     float level;
433     int index;
434     
435     public SegmentInfoAndLevel(SegmentCommitInfo info, float level, int index) {
436       this.info = info;
437       this.level = level;
438       this.index = index;
439     }
440 
441     // Sorts largest to smallest
442     @Override
443     public int compareTo(SegmentInfoAndLevel other) {
444       return Float.compare(other.level, level);
445     }
446   }
447 
448   /** Checks if any merges are now necessary and returns a
449    *  {@link MergePolicy.MergeSpecification} if so.  A merge
450    *  is necessary when there are more than {@link
451    *  #setMergeFactor} segments at a given level.  When
452    *  multiple levels have too many segments, this method
453    *  will return multiple merges, allowing the {@link
454    *  MergeScheduler} to use concurrency. */
455   @Override
456   public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos infos, IndexWriter writer) throws IOException {
457 
458     final int numSegments = infos.size();
459     if (verbose(writer)) {
460       message("findMerges: " + numSegments + " segments", writer);
461     }
462 
463     // Compute levels, which is just log (base mergeFactor)
464     // of the size of each segment
465     final List<SegmentInfoAndLevel> levels = new ArrayList<>(numSegments);
466     final float norm = (float) Math.log(mergeFactor);
467 
468     final Collection<SegmentCommitInfo> mergingSegments = writer.getMergingSegments();
469 
470     for(int i=0;i<numSegments;i++) {
471       final SegmentCommitInfo info = infos.info(i);
472       long size = size(info, writer);
473 
474       // Floor tiny segments
475       if (size < 1) {
476         size = 1;
477       }
478 
479       final SegmentInfoAndLevel infoLevel = new SegmentInfoAndLevel(info, (float) Math.log(size)/norm, i);
480       levels.add(infoLevel);
481 
482       if (verbose(writer)) {
483         final long segBytes = sizeBytes(info, writer);
484         String extra = mergingSegments.contains(info) ? " [merging]" : "";
485         if (size >= maxMergeSize) {
486           extra += " [skip: too large]";
487         }
488         message("seg=" + writer.segString(info) + " level=" + infoLevel.level + " size=" + String.format(Locale.ROOT, "%.3f MB", segBytes/1024/1024.) + extra, writer);
489       }
490     }
491 
492     final float levelFloor;
493     if (minMergeSize <= 0)
494       levelFloor = (float) 0.0;
495     else
496       levelFloor = (float) (Math.log(minMergeSize)/norm);
497 
498     // Now, we quantize the log values into levels.  The
499     // first level is any segment whose log size is within
500     // LEVEL_LOG_SPAN of the max size, or, who has such as
501     // segment "to the right".  Then, we find the max of all
502     // other segments and use that to define the next level
503     // segment, etc.
504 
505     MergeSpecification spec = null;
506 
507     final int numMergeableSegments = levels.size();
508 
509     int start = 0;
510     while(start < numMergeableSegments) {
511 
512       // Find max level of all segments not already
513       // quantized.
514       float maxLevel = levels.get(start).level;
515       for(int i=1+start;i<numMergeableSegments;i++) {
516         final float level = levels.get(i).level;
517         if (level > maxLevel) {
518           maxLevel = level;
519         }
520       }
521 
522       // Now search backwards for the rightmost segment that
523       // falls into this level:
524       float levelBottom;
525       if (maxLevel <= levelFloor) {
526         // All remaining segments fall into the min level
527         levelBottom = -1.0F;
528       } else {
529         levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN);
530 
531         // Force a boundary at the level floor
532         if (levelBottom < levelFloor && maxLevel >= levelFloor) {
533           levelBottom = levelFloor;
534         }
535       }
536 
537       int upto = numMergeableSegments-1;
538       while(upto >= start) {
539         if (levels.get(upto).level >= levelBottom) {
540           break;
541         }
542         upto--;
543       }
544       if (verbose(writer)) {
545         message("  level " + levelBottom + " to " + maxLevel + ": " + (1+upto-start) + " segments", writer);
546       }
547 
548       // Finally, record all merges that are viable at this level:
549       int end = start + mergeFactor;
550       while(end <= 1+upto) {
551         boolean anyTooLarge = false;
552         boolean anyMerging = false;
553         for(int i=start;i<end;i++) {
554           final SegmentCommitInfo info = levels.get(i).info;
555           anyTooLarge |= (size(info, writer) >= maxMergeSize || sizeDocs(info, writer) >= maxMergeDocs);
556           if (mergingSegments.contains(info)) {
557             anyMerging = true;
558             break;
559           }
560         }
561 
562         if (anyMerging) {
563           // skip
564         } else if (!anyTooLarge) {
565           if (spec == null)
566             spec = new MergeSpecification();
567           final List<SegmentCommitInfo> mergeInfos = new ArrayList<>(end-start);
568           for(int i=start;i<end;i++) {
569             mergeInfos.add(levels.get(i).info);
570             assert infos.contains(levels.get(i).info);
571           }
572           if (verbose(writer)) {
573             message("  add merge=" + writer.segString(mergeInfos) + " start=" + start + " end=" + end, writer);
574           }
575           spec.add(new OneMerge(mergeInfos));
576         } else if (verbose(writer)) {
577           message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping", writer);
578         }
579 
580         start = end;
581         end = start + mergeFactor;
582       }
583 
584       start = 1+upto;
585     }
586 
587     return spec;
588   }
589 
590   /** <p>Determines the largest segment (measured by
591    * document count) that may be merged with other segments.
592    * Small values (e.g., less than 10,000) are best for
593    * interactive indexing, as this limits the length of
594    * pauses while indexing to a few seconds.  Larger values
595    * are best for batched indexing and speedier
596    * searches.</p>
597    *
598    * <p>The default value is {@link Integer#MAX_VALUE}.</p>
599    *
600    * <p>The default merge policy ({@link
601    * LogByteSizeMergePolicy}) also allows you to set this
602    * limit by net size (in MB) of the segment, using {@link
603    * LogByteSizeMergePolicy#setMaxMergeMB}.</p>
604    */
605   public void setMaxMergeDocs(int maxMergeDocs) {
606     this.maxMergeDocs = maxMergeDocs;
607   }
608 
609   /** Returns the largest segment (measured by document
610    *  count) that may be merged with other segments.
611    *  @see #setMaxMergeDocs */
612   public int getMaxMergeDocs() {
613     return maxMergeDocs;
614   }
615 
616   @Override
617   public String toString() {
618     StringBuilder sb = new StringBuilder("[" + getClass().getSimpleName() + ": ");
619     sb.append("minMergeSize=").append(minMergeSize).append(", ");
620     sb.append("mergeFactor=").append(mergeFactor).append(", ");
621     sb.append("maxMergeSize=").append(maxMergeSize).append(", ");
622     sb.append("maxMergeSizeForForcedMerge=").append(maxMergeSizeForForcedMerge).append(", ");
623     sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", ");
624     sb.append("maxMergeDocs=").append(maxMergeDocs).append(", ");
625     sb.append("maxCFSSegmentSizeMB=").append(getMaxCFSSegmentSizeMB()).append(", ");
626     sb.append("noCFSRatio=").append(noCFSRatio);
627     sb.append("]");
628     return sb.toString();
629   }
630 
631 }